Importing Libraries
In [3]:
import pandas as pd
import os
import cv2
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
import time
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.applications import InceptionV3, DenseNet169, ResNet101
from sklearn.metrics import classification_report, confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, confusion_matrix
import cv2
from tensorflow.keras.models import Model
from scipy.stats import mode
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
Loading Dataset
In [4]:
df = pd.read_csv('Downloads/FETAL_PLANES_ZENODO/FETAL_PLANES_DB_data.csv', delimiter=';')
image_folder = 'Downloads/FETAL_PLANES_ZENODO/Images'
Preprocessing Images (Loading and Normalisation)
In [5]:
"""
Loadsing and preprocessing images in batches from a DataFrame.
Each image is resized to 128x128 pixels, normalized, and its label is converted to a one-hot encoding.
The `load_image` function loads and preprocesses individual images,
while `batch_image_loader` iterates through the DataFrame in chunks, yielding batches of images and labels.
Finally, all batches are combined into arrays, and the total shapes are printed.
"""
IMG_SIZE = 128
BATCH_SIZE = 250
def load_image(image_name):
img_path = os.path.join(image_folder, image_name + '.png')
img = cv2.imread(img_path)
if img is None:
print(f"Failed to load image: {img_path}")
return None
img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
img = img / 255.0
return img
def batch_image_loader(df, batch_size=BATCH_SIZE):
total_images = len(df)
for i in range(0, total_images, batch_size):
batch_df = df.iloc[i:i + batch_size]
batch_images = []
batch_labels = []
for img_name, label in zip(batch_df['Image_name'], batch_df['Plane']):
img = load_image(img_name)
if img is not None:
batch_images.append(img)
batch_labels.append(label)
batch_images = np.array(batch_images)
batch_labels = pd.Series(batch_labels).astype('category').cat.codes
batch_labels = to_categorical(batch_labels, num_classes=6)
yield batch_images, batch_labels
num_images_to_load = int(len(df))
all_images = []
all_labels = []
for images, labels in batch_image_loader(df.iloc[:num_images_to_load]):
all_images.append(images)
all_labels.append(labels)
all_images = np.concatenate(all_images, axis=0)
all_labels = np.concatenate(all_labels, axis=0)
print("Total images shape:", all_images.shape)
print("Total labels shape:", all_labels.shape)
Total images shape: (12400, 128, 128, 3) Total labels shape: (12400, 6)
Test - Train Split
In [8]:
X_train, X_val, y_train, y_val = train_test_split(all_images, all_labels, test_size=0.2, random_state=42)
In [ ]:
# # Data Augmentation (for training set)
# train_datagen = ImageDataGenerator(rotation_range=20, zoom_range=0.15, horizontal_flip=True)
# val_datagen = ImageDataGenerator() # No augmentation for validation
# train_gen = train_datagen.flow(X_train, y_train, batch_size=32)
# val_gen = val_datagen.flow(X_val, y_val, batch_size=32)
In [9]:
print(X_train.shape)
print(y_train.shape)
(9920, 128, 128, 3) (9920, 6)
In [20]:
y_train[0]
Out[20]:
array([0., 0., 0., 1., 0., 0.])
In [29]:
"""
Generating Gradcams for different models
"""
def apply_grad_cam(model, image, layer_name):
grad_model = Model(inputs=model.input, outputs=[model.output, model.get_layer(layer_name).output])
with tf.GradientTape() as tape:
model_out, conv_out = grad_model(np.expand_dims(image, axis=0))
loss = model_out[:, np.argmax(model_out[0])]
grads = tape.gradient(loss, conv_out)
pooled_grads = tf.reduce_mean(grads, axis=(0, 1))
heatmap = conv_out[0] @ pooled_grads[..., tf.newaxis]
heatmap = tf.maximum(heatmap, 0) / tf.reduce_max(heatmap)
return heatmap.numpy()
def display_grad_cam(original_image, heatmap, alpha=0.5):
"""
Display original image and Grad-CAM heatmap side by side.
"""
heatmap_resized = cv2.resize(heatmap, (original_image.shape[1], original_image.shape[0]))
heatmap_resized = np.clip(heatmap_resized, 0, 1)
heatmap_colored = cv2.applyColorMap(np.uint8(255 * heatmap_resized), cv2.COLORMAP_JET)
heatmap_colored = np.float32(heatmap_colored) / 255
overlayed_image = alpha * heatmap_colored + (1 - alpha) * original_image
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.imshow(original_image)
plt.title('Original Image')
plt.axis('off')
plt.subplot(1, 2, 2)
plt.imshow(overlayed_image)
plt.title('Grad-CAM Heatmap')
plt.axis('off')
plt.show()
In [30]:
"""
Collecting instances of correct and wrong for analysis
"""
def collect_instances(y_true, y_pred, num_correct=2, num_incorrect=2):
instances = {}
for class_label in np.unique(y_true):
instances[class_label] = {'correct': [], 'incorrect': []}
for index, (true_label, pred_label) in enumerate(zip(y_true, y_pred)):
if true_label == pred_label:
if len(instances[true_label]['correct']) < num_correct:
instances[true_label]['correct'].append(index)
else:
if len(instances[true_label]['incorrect']) < num_incorrect:
instances[true_label]['incorrect'].append(index)
return instances
Experiment 1 : Finetuning CNN Model
In [88]:
# Function to create InceptionV3 model
def create_inception_v3_model(input_shape=(128, 128, 3), num_classes=6):
base_model = InceptionV3(weights='imagenet', include_top=False, input_shape=input_shape)
base_model.trainable = False
x = base_model.output
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(128, activation='relu')(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(num_classes, activation='softmax')(x)
model = models.Model(inputs=base_model.input, outputs=outputs)
return model
# Function to create DenseNet169 model
def create_densenet169_model(input_shape=(128, 128, 3), num_classes=6):
base_model = DenseNet169(weights='imagenet', include_top=False, input_shape=input_shape)
base_model.trainable = False
x = base_model.output
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(128, activation='relu')(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(num_classes, activation='softmax')(x)
model = models.Model(inputs=base_model.input, outputs=outputs)
return model
# Function to create ResNet101 model
def create_resnet101_model(input_shape=(128, 128, 3), num_classes=6):
base_model = tf.keras.applications.ResNet101(weights='imagenet', include_top=False, input_shape=input_shape)
base_model.trainable = False
x = base_model.output
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(128, activation='relu')(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(num_classes, activation='softmax')(x)
model = models.Model(inputs=base_model.input, outputs=outputs)
return model
def train_and_evaluate_model(model, X_train, y_train, X_val, y_val, epochs=90, batch_size=32):
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(X_train, y_train,
validation_data=(X_val, y_val),
epochs=epochs,
batch_size=batch_size)
val_loss, val_accuracy = model.evaluate(X_val, y_val)
print(f'Validation loss: {val_loss}, Validation accuracy: {val_accuracy}')
return model, val_accuracy
def fine_tune_model(model, last_layers_to_unfreeze=30, epochs=40, learning_rate=1e-4, patience=5):
"""
Unfreezing layers and training the model with our data
"""
for layer in model.layers[-last_layers_to_unfreeze:]:
layer.trainable = True
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
loss='categorical_crossentropy',
metrics=['accuracy'])
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=patience,
restore_best_weights=True
)
fine_tune_history = model.fit(X_train, y_train,
validation_data=(X_val, y_val),
epochs=epochs,
batch_size=32,
callbacks=[early_stopping])
return model
def evaluate_model(model, X_val, y_val):
if len(y_val.shape) > 1 and y_val.shape[1] > 1:
y_val = np.argmax(y_val, axis=-1)
y_pred = np.argmax(model.predict(X_val), axis=-1)
print("Classification Report:\n", classification_report(y_val, y_pred))
cm = confusion_matrix(y_val, y_pred)
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=np.arange(6), yticklabels=np.arange(6))
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.title('Confusion Matrix')
plt.show()
inception_model = create_inception_v3_model()
print("Training InceptionV3 Model...")
inception_model, inception_accuracy = train_and_evaluate_model(inception_model, X_train, y_train, X_val, y_val)
print("Fine-tuning InceptionV3 Model...")
inception_model = fine_tune_model(inception_model)
print("Evaluating InceptionV3 Model...")
evaluate_model(inception_model, X_val, y_val)
densenet_model = create_densenet169_model()
print("Training DenseNet169 Model...")
densenet_model, densenet_accuracy = train_and_evaluate_model(densenet_model, X_train, y_train, X_val, y_val)
print("Fine-tuning DenseNet169 Model...")
densenet_model = fine_tune_model(densenet_model)
print("Evaluating DenseNet169 Model...")
evaluate_model(densenet_model, X_val, y_val)
resnet_model = create_resnet101_model()
print("Training ResNet101 Model...")
resnet_model, resnet_accuracy = train_and_evaluate_model(resnet_model, X_train, y_train, X_val, y_val)
print("Fine-tuning ResNet101 Model...")
resnet_model = fine_tune_model(resnet_model)
print("Evaluating ResNet101 Model...")
evaluate_model(resnet_model, X_val, y_val)
Training InceptionV3 Model... Epoch 1/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 68s 206ms/step - accuracy: 0.5589 - loss: 1.4293 - val_accuracy: 0.7524 - val_loss: 0.6432 Epoch 2/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 63s 204ms/step - accuracy: 0.7263 - loss: 0.7505 - val_accuracy: 0.7879 - val_loss: 0.5848 Epoch 3/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 73s 237ms/step - accuracy: 0.7580 - loss: 0.6481 - val_accuracy: 0.7956 - val_loss: 0.5702 Epoch 4/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 89s 287ms/step - accuracy: 0.7691 - loss: 0.6175 - val_accuracy: 0.8153 - val_loss: 0.5232 Epoch 5/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 83s 267ms/step - accuracy: 0.7798 - loss: 0.5950 - val_accuracy: 0.8129 - val_loss: 0.5296 Epoch 6/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 86s 277ms/step - accuracy: 0.7927 - loss: 0.5659 - val_accuracy: 0.8101 - val_loss: 0.5191 Epoch 7/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 85s 273ms/step - accuracy: 0.7963 - loss: 0.5309 - val_accuracy: 0.8278 - val_loss: 0.4943 Epoch 8/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 84s 272ms/step - accuracy: 0.8059 - loss: 0.5180 - val_accuracy: 0.8339 - val_loss: 0.4733 Epoch 9/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 84s 273ms/step - accuracy: 0.8133 - loss: 0.4995 - val_accuracy: 0.8363 - val_loss: 0.4733 Epoch 10/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 85s 273ms/step - accuracy: 0.8202 - loss: 0.4896 - val_accuracy: 0.8206 - val_loss: 0.5436 Epoch 11/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 85s 275ms/step - accuracy: 0.8251 - loss: 0.4766 - val_accuracy: 0.8371 - val_loss: 0.4838 Epoch 12/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 86s 276ms/step - accuracy: 0.8301 - loss: 0.4518 - val_accuracy: 0.8379 - val_loss: 0.4819 Epoch 13/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 85s 275ms/step - accuracy: 0.8276 - loss: 0.4679 - val_accuracy: 0.8351 - val_loss: 0.4885 Epoch 14/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 88s 284ms/step - accuracy: 0.8400 - loss: 0.4381 - val_accuracy: 0.8367 - val_loss: 0.4815 Epoch 15/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 85s 276ms/step - accuracy: 0.8332 - loss: 0.4312 - val_accuracy: 0.8355 - val_loss: 0.4795 Epoch 16/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 86s 278ms/step - accuracy: 0.8414 - loss: 0.4159 - val_accuracy: 0.8278 - val_loss: 0.4868 Epoch 17/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 86s 278ms/step - accuracy: 0.8437 - loss: 0.4123 - val_accuracy: 0.8335 - val_loss: 0.4912 Epoch 18/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 87s 281ms/step - accuracy: 0.8379 - loss: 0.4194 - val_accuracy: 0.8407 - val_loss: 0.4981 Epoch 19/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 88s 285ms/step - accuracy: 0.8430 - loss: 0.4049 - val_accuracy: 0.8440 - val_loss: 0.4974 Epoch 20/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 90s 291ms/step - accuracy: 0.8591 - loss: 0.3669 - val_accuracy: 0.8448 - val_loss: 0.4805 Epoch 21/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 88s 285ms/step - accuracy: 0.8500 - loss: 0.3797 - val_accuracy: 0.8379 - val_loss: 0.5160 Epoch 22/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 79s 254ms/step - accuracy: 0.8606 - loss: 0.3657 - val_accuracy: 0.8411 - val_loss: 0.4797 Epoch 23/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 83s 267ms/step - accuracy: 0.8600 - loss: 0.3567 - val_accuracy: 0.8407 - val_loss: 0.5088 Epoch 24/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 83s 267ms/step - accuracy: 0.8555 - loss: 0.3610 - val_accuracy: 0.8472 - val_loss: 0.4947 Epoch 25/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 83s 268ms/step - accuracy: 0.8578 - loss: 0.3585 - val_accuracy: 0.8379 - val_loss: 0.5458 Epoch 26/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 94s 302ms/step - accuracy: 0.8642 - loss: 0.3400 - val_accuracy: 0.8508 - val_loss: 0.5235 Epoch 27/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 99s 318ms/step - accuracy: 0.8589 - loss: 0.3499 - val_accuracy: 0.8290 - val_loss: 0.5893 Epoch 28/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 88s 283ms/step - accuracy: 0.8654 - loss: 0.3311 - val_accuracy: 0.8540 - val_loss: 0.5400 Epoch 29/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 83s 269ms/step - accuracy: 0.8691 - loss: 0.3272 - val_accuracy: 0.8504 - val_loss: 0.5416 Epoch 30/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 84s 271ms/step - accuracy: 0.8640 - loss: 0.3312 - val_accuracy: 0.8367 - val_loss: 0.5829 Epoch 31/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 83s 267ms/step - accuracy: 0.8687 - loss: 0.3214 - val_accuracy: 0.8371 - val_loss: 0.5197 Epoch 32/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 84s 270ms/step - accuracy: 0.8584 - loss: 0.3470 - val_accuracy: 0.8323 - val_loss: 0.5709 Epoch 33/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 86s 277ms/step - accuracy: 0.8675 - loss: 0.3351 - val_accuracy: 0.8476 - val_loss: 0.5622 Epoch 34/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 103s 333ms/step - accuracy: 0.8778 - loss: 0.3050 - val_accuracy: 0.8544 - val_loss: 0.5418 Epoch 35/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 85s 275ms/step - accuracy: 0.8821 - loss: 0.3008 - val_accuracy: 0.8468 - val_loss: 0.5600 Epoch 36/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 86s 277ms/step - accuracy: 0.8696 - loss: 0.3122 - val_accuracy: 0.8335 - val_loss: 0.6255 Epoch 37/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 86s 276ms/step - accuracy: 0.8680 - loss: 0.3228 - val_accuracy: 0.8427 - val_loss: 0.5683 Epoch 38/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 84s 272ms/step - accuracy: 0.8762 - loss: 0.3030 - val_accuracy: 0.8472 - val_loss: 0.6577 Epoch 39/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 85s 274ms/step - accuracy: 0.8819 - loss: 0.2877 - val_accuracy: 0.8532 - val_loss: 0.6139 Epoch 40/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 103s 333ms/step - accuracy: 0.8791 - loss: 0.2838 - val_accuracy: 0.8480 - val_loss: 0.5927 Epoch 41/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 87s 279ms/step - accuracy: 0.8825 - loss: 0.2944 - val_accuracy: 0.8444 - val_loss: 0.5709 Epoch 42/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 86s 278ms/step - accuracy: 0.8649 - loss: 0.3135 - val_accuracy: 0.8452 - val_loss: 0.6487 Epoch 43/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 84s 271ms/step - accuracy: 0.8865 - loss: 0.2760 - val_accuracy: 0.8278 - val_loss: 0.7565 Epoch 44/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 93s 300ms/step - accuracy: 0.8862 - loss: 0.2835 - val_accuracy: 0.8367 - val_loss: 0.6911 Epoch 45/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 86s 277ms/step - accuracy: 0.8661 - loss: 0.3163 - val_accuracy: 0.8423 - val_loss: 0.6345 Epoch 46/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 88s 284ms/step - accuracy: 0.8872 - loss: 0.2603 - val_accuracy: 0.8480 - val_loss: 0.5753 Epoch 47/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 83s 268ms/step - accuracy: 0.8763 - loss: 0.2947 - val_accuracy: 0.8512 - val_loss: 0.6272 Epoch 48/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 82s 264ms/step - accuracy: 0.8882 - loss: 0.2730 - val_accuracy: 0.8488 - val_loss: 0.6278 Epoch 49/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 82s 266ms/step - accuracy: 0.8871 - loss: 0.2670 - val_accuracy: 0.8411 - val_loss: 0.6973 Epoch 50/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 90s 290ms/step - accuracy: 0.8854 - loss: 0.2625 - val_accuracy: 0.8435 - val_loss: 0.6312 Epoch 51/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 105s 338ms/step - accuracy: 0.8822 - loss: 0.2687 - val_accuracy: 0.8480 - val_loss: 0.6730 Epoch 52/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 102s 330ms/step - accuracy: 0.8953 - loss: 0.2591 - val_accuracy: 0.8419 - val_loss: 0.7665 Epoch 53/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 86s 279ms/step - accuracy: 0.8785 - loss: 0.2720 - val_accuracy: 0.8464 - val_loss: 0.6791 Epoch 54/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 92s 298ms/step - accuracy: 0.8889 - loss: 0.2465 - val_accuracy: 0.8468 - val_loss: 0.6850 Epoch 55/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 87s 280ms/step - accuracy: 0.8954 - loss: 0.2493 - val_accuracy: 0.8488 - val_loss: 0.6364 Epoch 56/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 89s 286ms/step - accuracy: 0.8865 - loss: 0.2737 - val_accuracy: 0.8492 - val_loss: 0.6668 Epoch 57/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 91s 295ms/step - accuracy: 0.8912 - loss: 0.2457 - val_accuracy: 0.8468 - val_loss: 0.6285 Epoch 58/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 86s 277ms/step - accuracy: 0.8821 - loss: 0.2683 - val_accuracy: 0.8460 - val_loss: 0.6742 Epoch 59/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 83s 267ms/step - accuracy: 0.8849 - loss: 0.2758 - val_accuracy: 0.8492 - val_loss: 0.6668 Epoch 60/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 85s 274ms/step - accuracy: 0.9009 - loss: 0.2388 - val_accuracy: 0.8472 - val_loss: 0.6940 Epoch 61/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 83s 269ms/step - accuracy: 0.8823 - loss: 0.2625 - val_accuracy: 0.8323 - val_loss: 0.7449 Epoch 62/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 87s 281ms/step - accuracy: 0.8831 - loss: 0.2751 - val_accuracy: 0.8512 - val_loss: 0.7278 Epoch 63/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 83s 268ms/step - accuracy: 0.8913 - loss: 0.2565 - val_accuracy: 0.8452 - val_loss: 0.6867 Epoch 64/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 131s 422ms/step - accuracy: 0.8882 - loss: 0.2444 - val_accuracy: 0.8512 - val_loss: 0.7799 Epoch 65/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 98s 315ms/step - accuracy: 0.8974 - loss: 0.2327 - val_accuracy: 0.8444 - val_loss: 0.7248 Epoch 66/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 83s 269ms/step - accuracy: 0.8982 - loss: 0.2387 - val_accuracy: 0.8556 - val_loss: 0.7745 Epoch 67/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 91s 295ms/step - accuracy: 0.9005 - loss: 0.2353 - val_accuracy: 0.8488 - val_loss: 0.7831 Epoch 68/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 94s 302ms/step - accuracy: 0.9046 - loss: 0.2192 - val_accuracy: 0.8496 - val_loss: 0.7343 Epoch 69/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 97s 313ms/step - accuracy: 0.8996 - loss: 0.2386 - val_accuracy: 0.8456 - val_loss: 0.7461 Epoch 70/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 135s 437ms/step - accuracy: 0.8951 - loss: 0.2456 - val_accuracy: 0.8423 - val_loss: 0.7911 Epoch 71/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 96s 308ms/step - accuracy: 0.8978 - loss: 0.2327 - val_accuracy: 0.8516 - val_loss: 0.7370 Epoch 72/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 91s 295ms/step - accuracy: 0.8987 - loss: 0.2361 - val_accuracy: 0.8460 - val_loss: 0.7647 Epoch 73/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 93s 300ms/step - accuracy: 0.8957 - loss: 0.2492 - val_accuracy: 0.8411 - val_loss: 0.7945 Epoch 74/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 93s 301ms/step - accuracy: 0.8968 - loss: 0.2456 - val_accuracy: 0.8496 - val_loss: 0.7682 Epoch 75/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 92s 297ms/step - accuracy: 0.9011 - loss: 0.2253 - val_accuracy: 0.8496 - val_loss: 0.7115 Epoch 76/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 83s 268ms/step - accuracy: 0.8980 - loss: 0.2326 - val_accuracy: 0.8423 - val_loss: 0.7297 Epoch 77/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 91s 292ms/step - accuracy: 0.8947 - loss: 0.2427 - val_accuracy: 0.8524 - val_loss: 0.6876 Epoch 78/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 86s 278ms/step - accuracy: 0.9018 - loss: 0.2282 - val_accuracy: 0.8435 - val_loss: 0.8122 Epoch 79/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 85s 274ms/step - accuracy: 0.8918 - loss: 0.2417 - val_accuracy: 0.8476 - val_loss: 0.7391 Epoch 80/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 83s 268ms/step - accuracy: 0.8982 - loss: 0.2338 - val_accuracy: 0.8472 - val_loss: 0.8092 Epoch 81/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 85s 273ms/step - accuracy: 0.8979 - loss: 0.2317 - val_accuracy: 0.8423 - val_loss: 0.7760 Epoch 82/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 87s 282ms/step - accuracy: 0.9008 - loss: 0.2232 - val_accuracy: 0.8544 - val_loss: 0.8113 Epoch 83/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 85s 275ms/step - accuracy: 0.9051 - loss: 0.2111 - val_accuracy: 0.8464 - val_loss: 0.8143 Epoch 84/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 87s 280ms/step - accuracy: 0.9064 - loss: 0.2177 - val_accuracy: 0.8516 - val_loss: 0.7742 Epoch 85/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 84s 271ms/step - accuracy: 0.9020 - loss: 0.2258 - val_accuracy: 0.8532 - val_loss: 0.8618 Epoch 86/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 85s 274ms/step - accuracy: 0.9002 - loss: 0.2232 - val_accuracy: 0.8411 - val_loss: 0.9017 Epoch 87/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 84s 272ms/step - accuracy: 0.9084 - loss: 0.2173 - val_accuracy: 0.8472 - val_loss: 0.9171 Epoch 88/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 85s 275ms/step - accuracy: 0.9018 - loss: 0.2240 - val_accuracy: 0.8472 - val_loss: 0.8397 Epoch 89/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 85s 274ms/step - accuracy: 0.8988 - loss: 0.2272 - val_accuracy: 0.8516 - val_loss: 0.8538 Epoch 90/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 85s 274ms/step - accuracy: 0.9039 - loss: 0.2123 - val_accuracy: 0.8435 - val_loss: 0.8162 78/78 ━━━━━━━━━━━━━━━━━━━━ 16s 207ms/step - accuracy: 0.8480 - loss: 0.7882 Validation loss: 0.8162376880645752, Validation accuracy: 0.8435483574867249 Fine-tuning InceptionV3 Model... Epoch 1/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 101s 301ms/step - accuracy: 0.8091 - loss: 0.6407 - val_accuracy: 0.8383 - val_loss: 0.6011 Epoch 2/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 85s 274ms/step - accuracy: 0.8670 - loss: 0.3790 - val_accuracy: 0.8472 - val_loss: 0.5431 Epoch 3/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 83s 267ms/step - accuracy: 0.8928 - loss: 0.2788 - val_accuracy: 0.8577 - val_loss: 0.5648 Epoch 4/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 84s 269ms/step - accuracy: 0.9038 - loss: 0.2527 - val_accuracy: 0.8540 - val_loss: 0.6352 Epoch 5/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 84s 271ms/step - accuracy: 0.9137 - loss: 0.2215 - val_accuracy: 0.8601 - val_loss: 0.6209 Epoch 6/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 84s 270ms/step - accuracy: 0.9162 - loss: 0.2157 - val_accuracy: 0.8706 - val_loss: 0.6139 Epoch 7/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 84s 272ms/step - accuracy: 0.9290 - loss: 0.1918 - val_accuracy: 0.8718 - val_loss: 0.6502 Evaluating InceptionV3 Model... 78/78 ━━━━━━━━━━━━━━━━━━━━ 17s 214ms/step Classification Report: precision recall f1-score support 0 0.87 0.75 0.80 417 1 0.95 0.96 0.96 629 2 0.76 0.80 0.78 201 3 0.86 0.72 0.79 319 4 0.91 0.87 0.89 309 5 0.74 0.87 0.80 605 accuracy 0.85 2480 macro avg 0.85 0.83 0.84 2480 weighted avg 0.85 0.85 0.85 2480
Training DenseNet169 Model... Epoch 1/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 179s 527ms/step - accuracy: 0.6298 - loss: 1.0922 - val_accuracy: 0.8278 - val_loss: 0.5079 Epoch 2/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 153s 495ms/step - accuracy: 0.8051 - loss: 0.5609 - val_accuracy: 0.8399 - val_loss: 0.4328 Epoch 3/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 184s 595ms/step - accuracy: 0.8369 - loss: 0.4811 - val_accuracy: 0.8544 - val_loss: 0.4110 Epoch 4/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 184s 593ms/step - accuracy: 0.8420 - loss: 0.4452 - val_accuracy: 0.8657 - val_loss: 0.3954 Epoch 5/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 156s 502ms/step - accuracy: 0.8609 - loss: 0.4090 - val_accuracy: 0.8641 - val_loss: 0.4065 Epoch 6/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 161s 519ms/step - accuracy: 0.8638 - loss: 0.3823 - val_accuracy: 0.8665 - val_loss: 0.3799 Epoch 7/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 153s 494ms/step - accuracy: 0.8635 - loss: 0.3700 - val_accuracy: 0.8710 - val_loss: 0.3743 Epoch 8/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 152s 490ms/step - accuracy: 0.8758 - loss: 0.3402 - val_accuracy: 0.8734 - val_loss: 0.3678 Epoch 9/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 164s 530ms/step - accuracy: 0.8764 - loss: 0.3315 - val_accuracy: 0.8827 - val_loss: 0.3628 Epoch 10/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 161s 519ms/step - accuracy: 0.8952 - loss: 0.2900 - val_accuracy: 0.8819 - val_loss: 0.3651 Epoch 11/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 153s 492ms/step - accuracy: 0.8953 - loss: 0.2893 - val_accuracy: 0.8742 - val_loss: 0.3720 Epoch 12/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 159s 512ms/step - accuracy: 0.8799 - loss: 0.3144 - val_accuracy: 0.8806 - val_loss: 0.3556 Epoch 13/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 155s 500ms/step - accuracy: 0.8959 - loss: 0.2894 - val_accuracy: 0.8746 - val_loss: 0.3610 Epoch 14/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 154s 496ms/step - accuracy: 0.8877 - loss: 0.3017 - val_accuracy: 0.8738 - val_loss: 0.3725 Epoch 15/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 154s 496ms/step - accuracy: 0.9021 - loss: 0.2689 - val_accuracy: 0.8750 - val_loss: 0.3858 Epoch 16/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 152s 489ms/step - accuracy: 0.9033 - loss: 0.2613 - val_accuracy: 0.8798 - val_loss: 0.3654 Epoch 17/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 151s 489ms/step - accuracy: 0.9022 - loss: 0.2535 - val_accuracy: 0.8802 - val_loss: 0.3878 Epoch 18/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 152s 490ms/step - accuracy: 0.9117 - loss: 0.2432 - val_accuracy: 0.8895 - val_loss: 0.3787 Epoch 19/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 152s 491ms/step - accuracy: 0.9060 - loss: 0.2475 - val_accuracy: 0.8819 - val_loss: 0.3744 Epoch 20/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 163s 525ms/step - accuracy: 0.9056 - loss: 0.2386 - val_accuracy: 0.8750 - val_loss: 0.3819 Epoch 21/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 171s 550ms/step - accuracy: 0.9097 - loss: 0.2410 - val_accuracy: 0.8810 - val_loss: 0.3787 Epoch 22/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 176s 567ms/step - accuracy: 0.9117 - loss: 0.2265 - val_accuracy: 0.8766 - val_loss: 0.3932 Epoch 23/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 186s 602ms/step - accuracy: 0.9120 - loss: 0.2311 - val_accuracy: 0.8653 - val_loss: 0.4325 Epoch 24/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 208s 672ms/step - accuracy: 0.9156 - loss: 0.2168 - val_accuracy: 0.8718 - val_loss: 0.4764 Epoch 25/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 191s 617ms/step - accuracy: 0.9249 - loss: 0.1949 - val_accuracy: 0.8831 - val_loss: 0.3918 Epoch 26/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 178s 575ms/step - accuracy: 0.9240 - loss: 0.1991 - val_accuracy: 0.8786 - val_loss: 0.4027 Epoch 27/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 156s 504ms/step - accuracy: 0.9232 - loss: 0.1998 - val_accuracy: 0.8827 - val_loss: 0.4058 Epoch 28/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 163s 527ms/step - accuracy: 0.9267 - loss: 0.1971 - val_accuracy: 0.8810 - val_loss: 0.4519 Epoch 29/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 155s 501ms/step - accuracy: 0.9237 - loss: 0.1869 - val_accuracy: 0.8790 - val_loss: 0.4324 Epoch 30/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 156s 502ms/step - accuracy: 0.9297 - loss: 0.1909 - val_accuracy: 0.8871 - val_loss: 0.4086 Epoch 31/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 153s 494ms/step - accuracy: 0.9344 - loss: 0.1695 - val_accuracy: 0.8847 - val_loss: 0.4162 Epoch 32/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 161s 520ms/step - accuracy: 0.9264 - loss: 0.1947 - val_accuracy: 0.8855 - val_loss: 0.4191 Epoch 33/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 169s 545ms/step - accuracy: 0.9317 - loss: 0.1732 - val_accuracy: 0.8927 - val_loss: 0.3936 Epoch 34/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 179s 577ms/step - accuracy: 0.9391 - loss: 0.1612 - val_accuracy: 0.8839 - val_loss: 0.4227 Epoch 35/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 160s 516ms/step - accuracy: 0.9417 - loss: 0.1491 - val_accuracy: 0.8859 - val_loss: 0.4194 Epoch 36/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 162s 523ms/step - accuracy: 0.9332 - loss: 0.1644 - val_accuracy: 0.8923 - val_loss: 0.4498 Epoch 37/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 164s 531ms/step - accuracy: 0.9417 - loss: 0.1520 - val_accuracy: 0.8871 - val_loss: 0.4461 Epoch 38/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 201s 650ms/step - accuracy: 0.9326 - loss: 0.1700 - val_accuracy: 0.8843 - val_loss: 0.4561 Epoch 39/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 165s 531ms/step - accuracy: 0.9407 - loss: 0.1536 - val_accuracy: 0.8839 - val_loss: 0.4586 Epoch 40/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 162s 522ms/step - accuracy: 0.9355 - loss: 0.1627 - val_accuracy: 0.8734 - val_loss: 0.5259 Epoch 41/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 160s 518ms/step - accuracy: 0.9422 - loss: 0.1528 - val_accuracy: 0.8871 - val_loss: 0.4301 Epoch 42/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 167s 540ms/step - accuracy: 0.9421 - loss: 0.1469 - val_accuracy: 0.8827 - val_loss: 0.4969 Epoch 43/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 174s 563ms/step - accuracy: 0.9378 - loss: 0.1533 - val_accuracy: 0.8879 - val_loss: 0.4881 Epoch 44/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 174s 562ms/step - accuracy: 0.9458 - loss: 0.1429 - val_accuracy: 0.8815 - val_loss: 0.5017 Epoch 45/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 163s 527ms/step - accuracy: 0.9398 - loss: 0.1580 - val_accuracy: 0.8827 - val_loss: 0.4388 Epoch 46/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 163s 528ms/step - accuracy: 0.9490 - loss: 0.1375 - val_accuracy: 0.8915 - val_loss: 0.4796 Epoch 47/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 172s 555ms/step - accuracy: 0.9417 - loss: 0.1533 - val_accuracy: 0.8859 - val_loss: 0.4906 Epoch 48/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 159s 514ms/step - accuracy: 0.9501 - loss: 0.1395 - val_accuracy: 0.8851 - val_loss: 0.5386 Epoch 49/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 161s 520ms/step - accuracy: 0.9478 - loss: 0.1404 - val_accuracy: 0.8871 - val_loss: 0.4832 Epoch 50/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 160s 516ms/step - accuracy: 0.9457 - loss: 0.1375 - val_accuracy: 0.8903 - val_loss: 0.4635 Epoch 51/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 159s 513ms/step - accuracy: 0.9532 - loss: 0.1218 - val_accuracy: 0.8831 - val_loss: 0.5022 Epoch 52/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 158s 508ms/step - accuracy: 0.9461 - loss: 0.1389 - val_accuracy: 0.8867 - val_loss: 0.4925 Epoch 53/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 160s 517ms/step - accuracy: 0.9455 - loss: 0.1432 - val_accuracy: 0.8867 - val_loss: 0.4667 Epoch 54/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 160s 515ms/step - accuracy: 0.9531 - loss: 0.1282 - val_accuracy: 0.8887 - val_loss: 0.4970 Epoch 55/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 154s 497ms/step - accuracy: 0.9427 - loss: 0.1444 - val_accuracy: 0.8810 - val_loss: 0.5412 Epoch 56/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 152s 491ms/step - accuracy: 0.9493 - loss: 0.1300 - val_accuracy: 0.8867 - val_loss: 0.5121 Epoch 57/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 152s 490ms/step - accuracy: 0.9470 - loss: 0.1351 - val_accuracy: 0.8827 - val_loss: 0.5164 Epoch 58/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 153s 494ms/step - accuracy: 0.9546 - loss: 0.1269 - val_accuracy: 0.8859 - val_loss: 0.4578 Epoch 59/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 152s 491ms/step - accuracy: 0.9491 - loss: 0.1320 - val_accuracy: 0.8887 - val_loss: 0.5353 Epoch 60/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 152s 491ms/step - accuracy: 0.9483 - loss: 0.1260 - val_accuracy: 0.8875 - val_loss: 0.5081 Epoch 61/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 152s 490ms/step - accuracy: 0.9511 - loss: 0.1244 - val_accuracy: 0.8915 - val_loss: 0.5226 Epoch 62/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 152s 491ms/step - accuracy: 0.9509 - loss: 0.1219 - val_accuracy: 0.8839 - val_loss: 0.5621 Epoch 63/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 153s 493ms/step - accuracy: 0.9493 - loss: 0.1340 - val_accuracy: 0.8867 - val_loss: 0.5186 Epoch 64/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 152s 489ms/step - accuracy: 0.9540 - loss: 0.1211 - val_accuracy: 0.8871 - val_loss: 0.5716 Epoch 65/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 150s 485ms/step - accuracy: 0.9536 - loss: 0.1188 - val_accuracy: 0.8859 - val_loss: 0.5490 Epoch 66/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 151s 486ms/step - accuracy: 0.9529 - loss: 0.1183 - val_accuracy: 0.8819 - val_loss: 0.5418 Epoch 67/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 160s 517ms/step - accuracy: 0.9579 - loss: 0.1109 - val_accuracy: 0.8819 - val_loss: 0.5237 Epoch 68/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 153s 492ms/step - accuracy: 0.9531 - loss: 0.1167 - val_accuracy: 0.8806 - val_loss: 0.5726 Epoch 69/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 151s 488ms/step - accuracy: 0.9519 - loss: 0.1236 - val_accuracy: 0.8823 - val_loss: 0.5846 Epoch 70/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 194s 625ms/step - accuracy: 0.9578 - loss: 0.1106 - val_accuracy: 0.8883 - val_loss: 0.5441 Epoch 71/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 206s 664ms/step - accuracy: 0.9583 - loss: 0.1150 - val_accuracy: 0.8871 - val_loss: 0.5589 Epoch 72/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 172s 556ms/step - accuracy: 0.9549 - loss: 0.1166 - val_accuracy: 0.8863 - val_loss: 0.5367 Epoch 73/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 176s 567ms/step - accuracy: 0.9578 - loss: 0.1120 - val_accuracy: 0.8879 - val_loss: 0.5509 Epoch 74/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 165s 534ms/step - accuracy: 0.9582 - loss: 0.0982 - val_accuracy: 0.8847 - val_loss: 0.6188 Epoch 75/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 163s 526ms/step - accuracy: 0.9486 - loss: 0.1205 - val_accuracy: 0.8891 - val_loss: 0.4983 Epoch 76/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 169s 545ms/step - accuracy: 0.9602 - loss: 0.1057 - val_accuracy: 0.8879 - val_loss: 0.5425 Epoch 77/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 165s 533ms/step - accuracy: 0.9577 - loss: 0.1104 - val_accuracy: 0.8798 - val_loss: 0.5772 Epoch 78/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 166s 536ms/step - accuracy: 0.9565 - loss: 0.1138 - val_accuracy: 0.8798 - val_loss: 0.5769 Epoch 79/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 163s 526ms/step - accuracy: 0.9488 - loss: 0.1267 - val_accuracy: 0.8915 - val_loss: 0.5817 Epoch 80/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 165s 533ms/step - accuracy: 0.9623 - loss: 0.1033 - val_accuracy: 0.8859 - val_loss: 0.5619 Epoch 81/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 163s 525ms/step - accuracy: 0.9600 - loss: 0.1018 - val_accuracy: 0.8835 - val_loss: 0.6035 Epoch 82/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 159s 514ms/step - accuracy: 0.9627 - loss: 0.0951 - val_accuracy: 0.8851 - val_loss: 0.6300 Epoch 83/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 163s 526ms/step - accuracy: 0.9601 - loss: 0.1003 - val_accuracy: 0.8887 - val_loss: 0.5795 Epoch 84/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 166s 536ms/step - accuracy: 0.9593 - loss: 0.1012 - val_accuracy: 0.8879 - val_loss: 0.5945 Epoch 85/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 191s 617ms/step - accuracy: 0.9594 - loss: 0.1015 - val_accuracy: 0.8819 - val_loss: 0.6158 Epoch 86/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 166s 537ms/step - accuracy: 0.9570 - loss: 0.1114 - val_accuracy: 0.8851 - val_loss: 0.6133 Epoch 87/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 179s 579ms/step - accuracy: 0.9583 - loss: 0.1052 - val_accuracy: 0.8879 - val_loss: 0.5912 Epoch 88/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 196s 632ms/step - accuracy: 0.9589 - loss: 0.1087 - val_accuracy: 0.8802 - val_loss: 0.6150 Epoch 89/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 184s 594ms/step - accuracy: 0.9604 - loss: 0.1009 - val_accuracy: 0.8859 - val_loss: 0.6249 Epoch 90/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 180s 580ms/step - accuracy: 0.9615 - loss: 0.1012 - val_accuracy: 0.8875 - val_loss: 0.6238 78/78 ━━━━━━━━━━━━━━━━━━━━ 34s 440ms/step - accuracy: 0.8914 - loss: 0.5769 Validation loss: 0.6237850189208984, Validation accuracy: 0.887499988079071 Fine-tuning DenseNet169 Model... Epoch 1/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 184s 569ms/step - accuracy: 0.7700 - loss: 2.6160 - val_accuracy: 0.8786 - val_loss: 1.0476 Epoch 2/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 173s 559ms/step - accuracy: 0.9122 - loss: 0.4027 - val_accuracy: 0.8843 - val_loss: 0.8719 Epoch 3/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 185s 598ms/step - accuracy: 0.9314 - loss: 0.2236 - val_accuracy: 0.8915 - val_loss: 0.8170 Epoch 4/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 165s 531ms/step - accuracy: 0.9394 - loss: 0.1993 - val_accuracy: 0.8931 - val_loss: 0.7289 Epoch 5/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 167s 539ms/step - accuracy: 0.9443 - loss: 0.1495 - val_accuracy: 0.8972 - val_loss: 0.7031 Epoch 6/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 172s 553ms/step - accuracy: 0.9493 - loss: 0.1341 - val_accuracy: 0.8988 - val_loss: 0.7296 Epoch 7/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 171s 551ms/step - accuracy: 0.9538 - loss: 0.1240 - val_accuracy: 0.8976 - val_loss: 0.7272 Epoch 8/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 172s 554ms/step - accuracy: 0.9600 - loss: 0.1135 - val_accuracy: 0.8968 - val_loss: 0.7156 Epoch 9/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 175s 565ms/step - accuracy: 0.9540 - loss: 0.1132 - val_accuracy: 0.9024 - val_loss: 0.7302 Epoch 10/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 172s 555ms/step - accuracy: 0.9655 - loss: 0.0977 - val_accuracy: 0.8984 - val_loss: 0.7464 Evaluating DenseNet169 Model... 78/78 ━━━━━━━━━━━━━━━━━━━━ 36s 445ms/step Classification Report: precision recall f1-score support 0 0.87 0.90 0.88 417 1 0.98 0.97 0.98 629 2 0.87 0.82 0.84 201 3 0.86 0.91 0.88 319 4 0.91 0.87 0.89 309 5 0.86 0.85 0.86 605 accuracy 0.90 2480 macro avg 0.89 0.89 0.89 2480 weighted avg 0.90 0.90 0.90 2480
Training ResNet101 Model... Epoch 1/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 296s 915ms/step - accuracy: 0.3209 - loss: 1.6654 - val_accuracy: 0.4863 - val_loss: 1.3497 Epoch 2/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 282s 909ms/step - accuracy: 0.4893 - loss: 1.3308 - val_accuracy: 0.6149 - val_loss: 1.1654 Epoch 3/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 282s 910ms/step - accuracy: 0.5430 - loss: 1.1955 - val_accuracy: 0.6157 - val_loss: 1.0833 Epoch 4/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 375s 1s/step - accuracy: 0.5792 - loss: 1.1281 - val_accuracy: 0.6234 - val_loss: 1.0333 Epoch 5/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 466s 2s/step - accuracy: 0.6021 - loss: 1.0730 - val_accuracy: 0.6427 - val_loss: 0.9924 Epoch 6/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 393s 1s/step - accuracy: 0.6199 - loss: 1.0282 - val_accuracy: 0.6798 - val_loss: 0.9553 Epoch 7/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 381s 1s/step - accuracy: 0.6237 - loss: 1.0122 - val_accuracy: 0.6875 - val_loss: 0.9269 Epoch 8/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 366s 1s/step - accuracy: 0.6357 - loss: 0.9891 - val_accuracy: 0.6915 - val_loss: 0.8699 Epoch 9/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 370s 1s/step - accuracy: 0.6665 - loss: 0.9402 - val_accuracy: 0.6984 - val_loss: 0.8496 Epoch 10/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 329s 1s/step - accuracy: 0.6632 - loss: 0.9240 - val_accuracy: 0.7169 - val_loss: 0.8318 Epoch 11/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 277s 893ms/step - accuracy: 0.6620 - loss: 0.9119 - val_accuracy: 0.7202 - val_loss: 0.8129 Epoch 12/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 323s 1s/step - accuracy: 0.6812 - loss: 0.8925 - val_accuracy: 0.7234 - val_loss: 0.8096 Epoch 13/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 372s 1s/step - accuracy: 0.6813 - loss: 0.8911 - val_accuracy: 0.7266 - val_loss: 0.7856 Epoch 14/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 310s 1s/step - accuracy: 0.6887 - loss: 0.8618 - val_accuracy: 0.7290 - val_loss: 0.7682 Epoch 15/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 351s 1s/step - accuracy: 0.6886 - loss: 0.8559 - val_accuracy: 0.7278 - val_loss: 0.7685 Epoch 16/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 351s 1s/step - accuracy: 0.6956 - loss: 0.8521 - val_accuracy: 0.7327 - val_loss: 0.7548 Epoch 17/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 282s 912ms/step - accuracy: 0.6957 - loss: 0.8433 - val_accuracy: 0.7363 - val_loss: 0.7533 Epoch 18/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 278s 897ms/step - accuracy: 0.7093 - loss: 0.8202 - val_accuracy: 0.7367 - val_loss: 0.7446 Epoch 19/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 283s 915ms/step - accuracy: 0.7003 - loss: 0.8300 - val_accuracy: 0.7383 - val_loss: 0.7483 Epoch 20/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 320s 1s/step - accuracy: 0.7103 - loss: 0.7929 - val_accuracy: 0.7435 - val_loss: 0.7272 Epoch 21/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 278s 899ms/step - accuracy: 0.7172 - loss: 0.7981 - val_accuracy: 0.7456 - val_loss: 0.7177 Epoch 22/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 278s 897ms/step - accuracy: 0.7149 - loss: 0.8055 - val_accuracy: 0.7391 - val_loss: 0.7212 Epoch 23/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 281s 908ms/step - accuracy: 0.7189 - loss: 0.7778 - val_accuracy: 0.7460 - val_loss: 0.7174 Epoch 24/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 269s 867ms/step - accuracy: 0.7221 - loss: 0.7877 - val_accuracy: 0.7444 - val_loss: 0.7043 Epoch 25/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 304s 981ms/step - accuracy: 0.7228 - loss: 0.7788 - val_accuracy: 0.7480 - val_loss: 0.6961 Epoch 26/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 285s 920ms/step - accuracy: 0.7205 - loss: 0.7752 - val_accuracy: 0.7419 - val_loss: 0.7210 Epoch 27/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 280s 905ms/step - accuracy: 0.7143 - loss: 0.7815 - val_accuracy: 0.7540 - val_loss: 0.6966 Epoch 28/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 268s 865ms/step - accuracy: 0.7320 - loss: 0.7500 - val_accuracy: 0.7504 - val_loss: 0.6924 Epoch 29/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 302s 974ms/step - accuracy: 0.7269 - loss: 0.7614 - val_accuracy: 0.7460 - val_loss: 0.6942 Epoch 30/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 265s 854ms/step - accuracy: 0.7187 - loss: 0.7596 - val_accuracy: 0.7528 - val_loss: 0.6893 Epoch 31/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 264s 854ms/step - accuracy: 0.7344 - loss: 0.7352 - val_accuracy: 0.7544 - val_loss: 0.6888 Epoch 32/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 262s 846ms/step - accuracy: 0.7350 - loss: 0.7465 - val_accuracy: 0.7500 - val_loss: 0.6809 Epoch 33/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 263s 850ms/step - accuracy: 0.7173 - loss: 0.7654 - val_accuracy: 0.7411 - val_loss: 0.6908 Epoch 34/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 288s 931ms/step - accuracy: 0.7310 - loss: 0.7429 - val_accuracy: 0.7496 - val_loss: 0.6682 Epoch 35/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 276s 891ms/step - accuracy: 0.7388 - loss: 0.7353 - val_accuracy: 0.7444 - val_loss: 0.6968 Epoch 36/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 287s 927ms/step - accuracy: 0.7318 - loss: 0.7287 - val_accuracy: 0.7569 - val_loss: 0.6638 Epoch 37/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 267s 861ms/step - accuracy: 0.7367 - loss: 0.7440 - val_accuracy: 0.7524 - val_loss: 0.6693 Epoch 38/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 267s 861ms/step - accuracy: 0.7332 - loss: 0.7255 - val_accuracy: 0.7448 - val_loss: 0.6927 Epoch 39/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 256s 828ms/step - accuracy: 0.7459 - loss: 0.7190 - val_accuracy: 0.7613 - val_loss: 0.6591 Epoch 40/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 261s 843ms/step - accuracy: 0.7276 - loss: 0.7353 - val_accuracy: 0.7589 - val_loss: 0.6525 Epoch 41/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 259s 835ms/step - accuracy: 0.7295 - loss: 0.7435 - val_accuracy: 0.7593 - val_loss: 0.6496 Epoch 42/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 273s 882ms/step - accuracy: 0.7340 - loss: 0.7376 - val_accuracy: 0.7528 - val_loss: 0.6688 Epoch 43/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 321s 1s/step - accuracy: 0.7332 - loss: 0.7344 - val_accuracy: 0.7633 - val_loss: 0.6479 Epoch 44/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 269s 869ms/step - accuracy: 0.7394 - loss: 0.7115 - val_accuracy: 0.7456 - val_loss: 0.6961 Epoch 45/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 260s 841ms/step - accuracy: 0.7335 - loss: 0.7198 - val_accuracy: 0.7577 - val_loss: 0.6507 Epoch 46/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 426s 1s/step - accuracy: 0.7419 - loss: 0.7083 - val_accuracy: 0.7581 - val_loss: 0.6540 Epoch 47/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 335s 1s/step - accuracy: 0.7456 - loss: 0.7108 - val_accuracy: 0.7633 - val_loss: 0.6450 Epoch 48/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 337s 1s/step - accuracy: 0.7426 - loss: 0.7103 - val_accuracy: 0.7617 - val_loss: 0.6453 Epoch 49/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 304s 982ms/step - accuracy: 0.7435 - loss: 0.7173 - val_accuracy: 0.7601 - val_loss: 0.6466 Epoch 50/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 264s 851ms/step - accuracy: 0.7474 - loss: 0.6937 - val_accuracy: 0.7629 - val_loss: 0.6396 Epoch 51/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 252s 813ms/step - accuracy: 0.7404 - loss: 0.7070 - val_accuracy: 0.7609 - val_loss: 0.6363 Epoch 52/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 260s 839ms/step - accuracy: 0.7384 - loss: 0.7268 - val_accuracy: 0.7532 - val_loss: 0.6665 Epoch 53/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 257s 831ms/step - accuracy: 0.7441 - loss: 0.7087 - val_accuracy: 0.7685 - val_loss: 0.6330 Epoch 54/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 257s 828ms/step - accuracy: 0.7385 - loss: 0.7099 - val_accuracy: 0.7581 - val_loss: 0.6441 Epoch 55/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 257s 830ms/step - accuracy: 0.7437 - loss: 0.6985 - val_accuracy: 0.7673 - val_loss: 0.6407 Epoch 56/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 262s 845ms/step - accuracy: 0.7471 - loss: 0.6947 - val_accuracy: 0.7625 - val_loss: 0.6376 Epoch 57/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 262s 844ms/step - accuracy: 0.7530 - loss: 0.6737 - val_accuracy: 0.7528 - val_loss: 0.6647 Epoch 58/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 261s 843ms/step - accuracy: 0.7440 - loss: 0.7115 - val_accuracy: 0.7633 - val_loss: 0.6415 Epoch 59/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 270s 871ms/step - accuracy: 0.7480 - loss: 0.6898 - val_accuracy: 0.7601 - val_loss: 0.6473 Epoch 60/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 259s 837ms/step - accuracy: 0.7534 - loss: 0.6803 - val_accuracy: 0.7690 - val_loss: 0.6282 Epoch 61/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 260s 838ms/step - accuracy: 0.7449 - loss: 0.7010 - val_accuracy: 0.7722 - val_loss: 0.6274 Epoch 62/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 260s 838ms/step - accuracy: 0.7445 - loss: 0.6895 - val_accuracy: 0.7625 - val_loss: 0.6340 Epoch 63/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 260s 838ms/step - accuracy: 0.7481 - loss: 0.6858 - val_accuracy: 0.7665 - val_loss: 0.6312 Epoch 64/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 260s 838ms/step - accuracy: 0.7500 - loss: 0.6884 - val_accuracy: 0.7597 - val_loss: 0.6490 Epoch 65/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 260s 840ms/step - accuracy: 0.7511 - loss: 0.6813 - val_accuracy: 0.7669 - val_loss: 0.6441 Epoch 66/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 263s 848ms/step - accuracy: 0.7567 - loss: 0.6741 - val_accuracy: 0.7637 - val_loss: 0.6424 Epoch 67/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 262s 845ms/step - accuracy: 0.7483 - loss: 0.6706 - val_accuracy: 0.7673 - val_loss: 0.6352 Epoch 68/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 260s 841ms/step - accuracy: 0.7557 - loss: 0.6787 - val_accuracy: 0.7698 - val_loss: 0.6322 Epoch 69/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 259s 837ms/step - accuracy: 0.7546 - loss: 0.6693 - val_accuracy: 0.7685 - val_loss: 0.6214 Epoch 70/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 259s 837ms/step - accuracy: 0.7568 - loss: 0.6722 - val_accuracy: 0.7665 - val_loss: 0.6341 Epoch 71/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 259s 834ms/step - accuracy: 0.7520 - loss: 0.6615 - val_accuracy: 0.7681 - val_loss: 0.6280 Epoch 72/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 260s 840ms/step - accuracy: 0.7525 - loss: 0.6716 - val_accuracy: 0.7714 - val_loss: 0.6248 Epoch 73/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 262s 846ms/step - accuracy: 0.7471 - loss: 0.6794 - val_accuracy: 0.7629 - val_loss: 0.6355 Epoch 74/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 267s 862ms/step - accuracy: 0.7469 - loss: 0.6797 - val_accuracy: 0.7661 - val_loss: 0.6249 Epoch 75/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 264s 851ms/step - accuracy: 0.7590 - loss: 0.6603 - val_accuracy: 0.7734 - val_loss: 0.6102 Epoch 76/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 265s 855ms/step - accuracy: 0.7582 - loss: 0.6605 - val_accuracy: 0.7730 - val_loss: 0.6252 Epoch 77/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 264s 852ms/step - accuracy: 0.7544 - loss: 0.6552 - val_accuracy: 0.7649 - val_loss: 0.6278 Epoch 78/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 264s 853ms/step - accuracy: 0.7587 - loss: 0.6604 - val_accuracy: 0.7629 - val_loss: 0.6176 Epoch 79/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 266s 858ms/step - accuracy: 0.7566 - loss: 0.6626 - val_accuracy: 0.7669 - val_loss: 0.6236 Epoch 80/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 266s 859ms/step - accuracy: 0.7647 - loss: 0.6481 - val_accuracy: 0.7629 - val_loss: 0.6162 Epoch 81/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 264s 852ms/step - accuracy: 0.7577 - loss: 0.6584 - val_accuracy: 0.7694 - val_loss: 0.6177 Epoch 82/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 262s 847ms/step - accuracy: 0.7532 - loss: 0.6719 - val_accuracy: 0.7605 - val_loss: 0.6461 Epoch 83/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 258s 832ms/step - accuracy: 0.7508 - loss: 0.6718 - val_accuracy: 0.7726 - val_loss: 0.6211 Epoch 84/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 257s 829ms/step - accuracy: 0.7554 - loss: 0.6604 - val_accuracy: 0.7726 - val_loss: 0.6187 Epoch 85/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 258s 832ms/step - accuracy: 0.7677 - loss: 0.6498 - val_accuracy: 0.7641 - val_loss: 0.6293 Epoch 86/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 260s 839ms/step - accuracy: 0.7630 - loss: 0.6585 - val_accuracy: 0.7758 - val_loss: 0.6011 Epoch 87/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 263s 848ms/step - accuracy: 0.7602 - loss: 0.6552 - val_accuracy: 0.7746 - val_loss: 0.6068 Epoch 88/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 260s 838ms/step - accuracy: 0.7585 - loss: 0.6536 - val_accuracy: 0.7722 - val_loss: 0.6002 Epoch 89/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 260s 840ms/step - accuracy: 0.7622 - loss: 0.6478 - val_accuracy: 0.7766 - val_loss: 0.6077 Epoch 90/90 310/310 ━━━━━━━━━━━━━━━━━━━━ 259s 837ms/step - accuracy: 0.7637 - loss: 0.6485 - val_accuracy: 0.7730 - val_loss: 0.6140 78/78 ━━━━━━━━━━━━━━━━━━━━ 52s 668ms/step - accuracy: 0.7686 - loss: 0.6051 Validation loss: 0.6140414476394653, Validation accuracy: 0.7729838490486145 Fine-tuning ResNet101 Model... Epoch 1/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 329s 1s/step - accuracy: 0.5260 - loss: 11.7546 - val_accuracy: 0.6641 - val_loss: 1.0163 Epoch 2/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 319s 1s/step - accuracy: 0.6584 - loss: 0.9469 - val_accuracy: 0.7407 - val_loss: 0.7612 Epoch 3/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 321s 1s/step - accuracy: 0.7032 - loss: 0.8403 - val_accuracy: 0.7637 - val_loss: 0.6593 Epoch 4/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 318s 1s/step - accuracy: 0.7428 - loss: 0.7092 - val_accuracy: 0.7536 - val_loss: 0.7919 Epoch 5/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 315s 1s/step - accuracy: 0.7669 - loss: 0.6491 - val_accuracy: 0.6512 - val_loss: 1.1880 Epoch 6/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 316s 1s/step - accuracy: 0.7956 - loss: 0.6007 - val_accuracy: 0.7306 - val_loss: 0.7626 Epoch 7/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 315s 1s/step - accuracy: 0.7990 - loss: 0.5856 - val_accuracy: 0.7427 - val_loss: 0.7281 Epoch 8/40 310/310 ━━━━━━━━━━━━━━━━━━━━ 322s 1s/step - accuracy: 0.7967 - loss: 0.5876 - val_accuracy: 0.6411 - val_loss: 1.0406 Evaluating ResNet101 Model... 78/78 ━━━━━━━━━━━━━━━━━━━━ 53s 670ms/step Classification Report: precision recall f1-score support 0 0.66 0.76 0.71 417 1 0.91 0.96 0.94 629 2 0.85 0.61 0.71 201 3 0.81 0.44 0.57 319 4 0.88 0.71 0.78 309 5 0.64 0.82 0.72 605 accuracy 0.76 2480 macro avg 0.79 0.71 0.74 2480 weighted avg 0.78 0.76 0.76 2480
In [89]:
inception_model.save('inception_v3_model_final_120ep.h5')
print("InceptionV3 model saved as 'inception_v3_model_final_120ep.h5'.")
densenet_model.save('densenet169_model_final_120ep.h5')
print("DenseNet169 model saved as 'densenet169_model_final_120ep.h5'.")
resnet_model.save('resnet101_model_final_120ep.h5')
print("ResNet101 model saved as 'resnet101_model_final_120ep.h5'.")
WARNING:absl:You are saving your model as an HDF5 file via `model.save()` or `keras.saving.save_model(model)`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')` or `keras.saving.save_model(model, 'my_model.keras')`.
WARNING:absl:You are saving your model as an HDF5 file via `model.save()` or `keras.saving.save_model(model)`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')` or `keras.saving.save_model(model, 'my_model.keras')`.
InceptionV3 model saved as 'inception_v3_model_final_120ep.h5'.
WARNING:absl:You are saving your model as an HDF5 file via `model.save()` or `keras.saving.save_model(model)`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')` or `keras.saving.save_model(model, 'my_model.keras')`.
DenseNet169 model saved as 'densenet169_model_final_120ep.h5'. ResNet101 model saved as 'resnet101_model_final_120ep.h5'.
In [19]:
# Load the models
inception_model = tf.keras.models.load_model('inception_v3_model_final_120ep.h5')
print("InceptionV3 model loaded.")
densenet_model = tf.keras.models.load_model('densenet169_model_final_120ep.h5')
print("DenseNet169 model loaded.")
resnet_model = tf.keras.models.load_model('resnet101_model_final_120ep.h5')
print("ResNet101 model loaded.")
WARNING:absl:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.
InceptionV3 model loaded.
WARNING:absl:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.
DenseNet169 model loaded.
WARNING:absl:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.
ResNet101 model loaded.
In [92]:
def evaluate_and_collect_grad_cam(model, X_val, y_val, model_name, layer_name):
"""
GradCams for correct and incorrect predictions
"""
y_val_indices = np.argmax(y_val, axis=-1)
y_pred = np.argmax(model.predict(X_val), axis=-1)
instances = collect_instances(y_val_indices, y_pred, num_correct=2, num_incorrect=2)
print(f"Grad-CAM for {model_name}:")
for class_label, example_indices in instances.items():
for correct_index in example_indices['correct']:
print(f"Class {class_label}: Correct Prediction")
heatmap = apply_grad_cam(model, X_val[correct_index], layer_name)
display_grad_cam(X_val[correct_index], heatmap)
for incorrect_index in example_indices['incorrect']:
print(f"Class {class_label}: Incorrect Prediction")
heatmap = apply_grad_cam(model, X_val[incorrect_index], layer_name)
display_grad_cam(X_val[incorrect_index], heatmap)
layer_names = {
'DenseNet169': 'conv5_block32_1_relu',
'InceptionV3': 'mixed10',
'ResNet101': 'conv5_block3_out'
}
evaluate_and_collect_grad_cam(densenet_model, X_val, y_val, 'DenseNet169', layer_names['DenseNet169'])
evaluate_and_collect_grad_cam(inception_model, X_val, y_val, 'InceptionV3', layer_names['InceptionV3'])
evaluate_and_collect_grad_cam(resnet_model, X_val, y_val, 'ResNet101', layer_names['ResNet101'])
78/78 ━━━━━━━━━━━━━━━━━━━━ 24s 309ms/step Grad-CAM for DenseNet169: Class 0: Correct Prediction
Class 0: Correct Prediction
Class 0: Incorrect Prediction
Class 0: Incorrect Prediction
Class 1: Correct Prediction
Class 1: Correct Prediction
Class 1: Incorrect Prediction
Class 1: Incorrect Prediction
Class 2: Correct Prediction
Class 2: Correct Prediction
Class 2: Incorrect Prediction
Class 2: Incorrect Prediction
Class 3: Correct Prediction
Class 3: Correct Prediction
Class 3: Incorrect Prediction
Class 3: Incorrect Prediction
Class 4: Correct Prediction
Class 4: Correct Prediction
Class 4: Incorrect Prediction
Class 4: Incorrect Prediction
Class 5: Correct Prediction
Class 5: Correct Prediction
Class 5: Incorrect Prediction
Class 5: Incorrect Prediction
78/78 ━━━━━━━━━━━━━━━━━━━━ 12s 149ms/step Grad-CAM for InceptionV3: Class 0: Correct Prediction
Class 0: Correct Prediction
Class 0: Incorrect Prediction
Class 0: Incorrect Prediction
Class 1: Correct Prediction
Class 1: Correct Prediction
Class 1: Incorrect Prediction
Class 1: Incorrect Prediction
Class 2: Correct Prediction
Class 2: Correct Prediction
Class 2: Incorrect Prediction
Class 2: Incorrect Prediction
Class 3: Correct Prediction
Class 3: Correct Prediction
Class 3: Incorrect Prediction
Class 3: Incorrect Prediction
Class 4: Correct Prediction
Class 4: Correct Prediction
Class 4: Incorrect Prediction
Class 4: Incorrect Prediction
Class 5: Correct Prediction
Class 5: Correct Prediction
Class 5: Incorrect Prediction
Class 5: Incorrect Prediction
78/78 ━━━━━━━━━━━━━━━━━━━━ 39s 489ms/step Grad-CAM for ResNet101: Class 0: Correct Prediction
Class 0: Correct Prediction
Class 0: Incorrect Prediction
Class 0: Incorrect Prediction
Class 1: Correct Prediction
Class 1: Correct Prediction
Class 1: Incorrect Prediction
Class 1: Incorrect Prediction
Class 2: Correct Prediction
Class 2: Correct Prediction
Class 2: Incorrect Prediction
Class 2: Incorrect Prediction
Class 3: Correct Prediction
Class 3: Correct Prediction
Class 3: Incorrect Prediction
Class 3: Incorrect Prediction
Class 4: Correct Prediction
Class 4: Correct Prediction
Class 4: Incorrect Prediction
Class 4: Incorrect Prediction
Class 5: Correct Prediction
Class 5: Correct Prediction
Class 5: Incorrect Prediction
Class 5: Incorrect Prediction
Experiment 2 : Ensemble Model using Majority Voting
In [27]:
predictions_inception = np.argmax(inception_model.predict(X_val), axis=1)
predictions_densenet = np.argmax(densenet_model.predict(X_val), axis=1)
predictions_resnet = np.argmax(resnet_model.predict(X_val), axis=1)
78/78 ━━━━━━━━━━━━━━━━━━━━ 12s 159ms/step 78/78 ━━━━━━━━━━━━━━━━━━━━ 27s 341ms/step 78/78 ━━━━━━━━━━━━━━━━━━━━ 46s 588ms/step
In [28]:
combined_predictions = np.array([predictions_inception,
predictions_densenet,
predictions_resnet])
ensemble_predictions, _ = mode(combined_predictions, axis=0)
ensemble_predictions = ensemble_predictions.flatten()
In [96]:
def evaluate_ensemble_model(ensemble_predictions, y_val):
if len(y_val.shape) > 1 and y_val.shape[1] > 1:
y_val = np.argmax(y_val, axis=-1)
print("Shape of ensemble_predictions:", ensemble_predictions.shape)
print("Sample of ensemble_predictions:\n", ensemble_predictions[:5])
y_pred = ensemble_predictions
print("Shape of y_val:", y_val.shape)
print("Shape of y_pred:", y_pred.shape)
print("Classification Report:\n", classification_report(y_val, y_pred))
cm = confusion_matrix(y_val, y_pred)
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=np.arange(6), yticklabels=np.arange(6))
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.title('Confusion Matrix')
plt.show()
evaluate_ensemble_model(ensemble_predictions, y_val)
Shape of ensemble_predictions: (2480,)
Sample of ensemble_predictions:
[4 4 4 3 1]
Shape of y_val: (2480,)
Shape of y_pred: (2480,)
Classification Report:
precision recall f1-score support
0 0.80 0.88 0.84 417
1 0.97 0.98 0.98 629
2 0.88 0.79 0.83 201
3 0.91 0.76 0.83 319
4 0.92 0.86 0.89 309
5 0.81 0.87 0.84 605
accuracy 0.88 2480
macro avg 0.88 0.86 0.87 2480
weighted avg 0.88 0.88 0.88 2480
In [35]:
def evaluate_and_collect_grad_cam_ensemble(models, X_val, y_val, model_names, layer_names, ensemble_predictions):
"""
GradCams for correct and incorrect predictions of ensemble model
"""
y_val_indices = np.argmax(y_val, axis=-1)
instances = collect_instances(y_val_indices, ensemble_predictions, num_correct=2, num_incorrect=2)
print("Grad-CAM for Ensemble Model:")
for class_label, example_indices in instances.items():
for correct_index in example_indices['correct']:
print(f"Class {class_label}: Correct Prediction")
for model, name, layer in zip(models, model_names, layer_names):
heatmap = apply_grad_cam(model, X_val[correct_index], layer)
display_grad_cam(X_val[correct_index], heatmap)
for incorrect_index in example_indices['incorrect']:
print(f"Class {class_label}: Incorrect Prediction")
for model, name, layer in zip(models, model_names, layer_names):
heatmap = apply_grad_cam(model, X_val[incorrect_index], layer)
display_grad_cam(X_val[incorrect_index], heatmap)
models = [densenet_model, inception_model, resnet_model]
model_names = ['DenseNet169', 'InceptionV3', 'ResNet101']
layer_names = {
'DenseNet169': 'conv5_block32_1_relu',
'InceptionV3': 'mixed10',
'ResNet101': 'conv5_block3_out'
}
layer_names = [layer_names['DenseNet169'], layer_names['InceptionV3'], layer_names['ResNet101']]
evaluate_and_collect_grad_cam_ensemble(models, X_val, y_val, model_names, layer_names,ensemble_predictions)
Grad-CAM for Ensemble Model: Class 0: Correct Prediction
Class 0: Correct Prediction
Class 0: Incorrect Prediction
Class 0: Incorrect Prediction
Class 1: Correct Prediction
Class 1: Correct Prediction
Class 1: Incorrect Prediction
Class 1: Incorrect Prediction
Class 2: Correct Prediction
Class 2: Correct Prediction
Class 2: Incorrect Prediction
Class 2: Incorrect Prediction
Class 3: Correct Prediction
Class 3: Correct Prediction
Class 3: Incorrect Prediction
Class 3: Incorrect Prediction
Class 4: Correct Prediction
Class 4: Correct Prediction
Class 4: Incorrect Prediction
Class 4: Incorrect Prediction
Class 5: Correct Prediction
Class 5: Correct Prediction
Class 5: Incorrect Prediction
Class 5: Incorrect Prediction
Experiment 3: Stacking Ensemble with XGBoost and RandomForest
In [108]:
def train_stacking_ensemble(predictions_inception, predictions_densenet, predictions_resnet, y_val, num_classes=6):
"""
Trains a stacking ensemble using XGBoost as the meta-learner and returns ensemble predictions.
"""
X_meta = np.column_stack([predictions_inception, predictions_densenet, predictions_resnet])
if len(y_val.shape) > 1 and y_val.shape[1] > 1:
y_val = np.argmax(y_val, axis=-1)
dtrain = xgb.DMatrix(X_meta, label=y_val)
params = {
'objective': 'multi:softmax',
'num_class': num_classes,
'eval_metric': 'mlogloss',
}
bst = xgb.train(params, dtrain, num_boost_round=100)
dtest = xgb.DMatrix(X_meta)
ensemble_predictions = bst.predict(dtest)
return ensemble_predictions
def evaluate_ensemble_model(ensemble_predictions, y_val, num_classes=6):
"""
Evaluates ensemble model predictions against true labels (y_val) using classification metrics and confusion matrix.
"""
if len(y_val.shape) > 1 and y_val.shape[1] > 1:
y_val = np.argmax(y_val, axis=-1)
print("Shape of ensemble_predictions:", ensemble_predictions.shape)
print("Sample of ensemble_predictions:\n", ensemble_predictions[:5])
y_pred = ensemble_predictions
print("Shape of y_val:", y_val.shape)
print("Shape of y_pred:", y_pred.shape)
print("Classification Report:\n", classification_report(y_val, y_pred, zero_division=1))
cm = confusion_matrix(y_val, y_pred)
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
xticklabels=np.arange(num_classes),
yticklabels=np.arange(num_classes))
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.title('Confusion Matrix')
plt.show()
ensemble_predictions = train_stacking_ensemble(predictions_inception, predictions_densenet, predictions_resnet, y_val, num_classes=6)
evaluate_ensemble_model(ensemble_predictions, y_val, num_classes=6)
Shape of ensemble_predictions: (2480,)
Sample of ensemble_predictions:
[4. 4. 4. 3. 1.]
Shape of y_val: (2480,)
Shape of y_pred: (2480,)
Classification Report:
precision recall f1-score support
0 0.88 0.92 0.90 417
1 0.99 0.98 0.99 629
2 0.87 0.85 0.86 201
3 0.86 0.94 0.90 319
4 0.94 0.87 0.90 309
5 0.88 0.86 0.87 605
accuracy 0.91 2480
macro avg 0.90 0.90 0.90 2480
weighted avg 0.91 0.91 0.91 2480
In [39]:
evaluate_and_collect_grad_cam_ensemble(models, X_val, y_val, model_names, layer_names,ensemble_predictions)
Grad-CAM for Ensemble Model: Class 0: Correct Prediction
Class 0: Correct Prediction
Class 0: Incorrect Prediction
Class 0: Incorrect Prediction
Class 1: Correct Prediction
Class 1: Correct Prediction
Class 1: Incorrect Prediction
Class 1: Incorrect Prediction
Class 2: Correct Prediction
Class 2: Correct Prediction
Class 2: Incorrect Prediction
Class 2: Incorrect Prediction
Class 3: Correct Prediction
Class 3: Correct Prediction
Class 3: Incorrect Prediction
Class 3: Incorrect Prediction
Class 4: Correct Prediction
Class 4: Correct Prediction
Class 4: Incorrect Prediction
Class 4: Incorrect Prediction
Class 5: Correct Prediction
Class 5: Correct Prediction
Class 5: Incorrect Prediction
Class 5: Incorrect Prediction
In [41]:
def train_stacking_ensemble_rf(predictions_inception, predictions_densenet, predictions_resnet, y_val, num_classes=6):
"""
Trains a stacking ensemble using Random Forest as the meta-learner and returns ensemble predictions.
"""
X_meta = np.column_stack([predictions_inception, predictions_densenet, predictions_resnet])
if len(y_val.shape) > 1 and y_val.shape[1] > 1:
y_val = np.argmax(y_val, axis=-1)
rf_meta = RandomForestClassifier(n_estimators=100, random_state=42)
rf_meta.fit(X_meta, y_val)
ensemble_predictions = rf_meta.predict(X_meta)
return ensemble_predictions
def evaluate_ensemble_model(ensemble_predictions, y_val, num_classes=6):
"""
Evaluates ensemble model predictions against true labels (y_val) using classification metrics and confusion matrix.
"""
if len(y_val.shape) > 1 and y_val.shape[1] > 1:
y_val = np.argmax(y_val, axis=-1)
print("Shape of ensemble_predictions:", ensemble_predictions.shape)
print("Sample of ensemble_predictions:\n", ensemble_predictions[:5])
y_pred = ensemble_predictions
print("Shape of y_val:", y_val.shape)
print("Shape of y_pred:", y_pred.shape)
print("Classification Report:\n", classification_report(y_val, y_pred, zero_division=1))
cm = confusion_matrix(y_val, y_pred)
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
xticklabels=np.arange(num_classes),
yticklabels=np.arange(num_classes))
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.title('Confusion Matrix')
plt.show()
ensemble_predictions = train_stacking_ensemble_rf(predictions_inception, predictions_densenet, predictions_resnet, y_val, num_classes=6)
evaluate_ensemble_model(ensemble_predictions, y_val, num_classes=6)
Shape of ensemble_predictions: (2480,)
Sample of ensemble_predictions:
[4 4 4 3 1]
Shape of y_val: (2480,)
Shape of y_pred: (2480,)
Classification Report:
precision recall f1-score support
0 0.87 0.92 0.89 417
1 0.99 0.99 0.99 629
2 0.88 0.84 0.86 201
3 0.87 0.94 0.90 319
4 0.94 0.87 0.90 309
5 0.88 0.86 0.87 605
accuracy 0.91 2480
macro avg 0.90 0.90 0.90 2480
weighted avg 0.91 0.91 0.91 2480
In [43]:
evaluate_and_collect_grad_cam_ensemble(models, X_val, y_val, model_names, layer_names,ensemble_predictions)
Grad-CAM for Ensemble Model: Class 0: Correct Prediction
Class 0: Correct Prediction
Class 0: Incorrect Prediction
Class 0: Incorrect Prediction
Class 1: Correct Prediction
Class 1: Correct Prediction
Class 1: Incorrect Prediction
Class 1: Incorrect Prediction
Class 2: Correct Prediction
Class 2: Correct Prediction
Class 2: Incorrect Prediction
Class 2: Incorrect Prediction
Class 3: Correct Prediction
Class 3: Correct Prediction
Class 3: Incorrect Prediction
Class 3: Incorrect Prediction
Class 4: Correct Prediction
Class 4: Correct Prediction
Class 4: Incorrect Prediction
Class 4: Incorrect Prediction
Class 5: Correct Prediction
Class 5: Correct Prediction
Class 5: Incorrect Prediction
Class 5: Incorrect Prediction
Experiment 4 : Soft Voting Ensemble
In [124]:
predictions_inception_proba = inception_model.predict(X_val)
predictions_densenet_proba = densenet_model.predict(X_val)
predictions_resnet_proba = resnet_model.predict(X_val)
def soft_voting_ensemble(predictions_inception_proba, predictions_densenet_proba, predictions_resnet_proba):
"""
Implements soft voting by averaging the probability predictions from base models and selecting the class with the highest probability.
"""
average_proba = (predictions_inception_proba + predictions_densenet_proba + predictions_resnet_proba) / 3
ensemble_predictions = np.argmax(average_proba, axis=1)
return ensemble_predictions
ensemble_predictions = soft_voting_ensemble(predictions_inception_proba, predictions_densenet_proba, predictions_resnet_proba)
def evaluate_ensemble_model(ensemble_predictions, y_val, num_classes=6):
if len(y_val.shape) > 1 and y_val.shape[1] > 1:
y_val = np.argmax(y_val, axis=-1)
print("Shape of ensemble_predictions:", ensemble_predictions.shape)
print("Sample of ensemble_predictions:\n", ensemble_predictions[:5])
print("Classification Report:\n", classification_report(y_val, ensemble_predictions))
cm = confusion_matrix(y_val, ensemble_predictions)
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=np.arange(num_classes), yticklabels=np.arange(num_classes))
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.title('Confusion Matrix')
plt.show()
evaluate_ensemble_model(ensemble_predictions, y_val)
78/78 ━━━━━━━━━━━━━━━━━━━━ 11s 146ms/step 78/78 ━━━━━━━━━━━━━━━━━━━━ 24s 303ms/step 78/78 ━━━━━━━━━━━━━━━━━━━━ 40s 508ms/step Shape of ensemble_predictions: (2480,) Sample of ensemble_predictions: [4 4 4 3 1] Classification Report: precision recall f1-score support 0 0.89 0.89 0.89 417 1 0.98 0.98 0.98 629 2 0.90 0.84 0.87 201 3 0.90 0.89 0.90 319 4 0.93 0.89 0.91 309 5 0.84 0.89 0.86 605 accuracy 0.91 2480 macro avg 0.91 0.90 0.90 2480 weighted avg 0.91 0.91 0.91 2480
In [49]:
evaluate_and_collect_grad_cam_ensemble(models, X_val, y_val, model_names, layer_names,ensemble_predictions)
Grad-CAM for Ensemble Model: Class 0: Correct Prediction
Class 0: Correct Prediction
Class 0: Incorrect Prediction
Class 0: Incorrect Prediction
Class 1: Correct Prediction
Class 1: Correct Prediction
Class 1: Incorrect Prediction
Class 1: Incorrect Prediction
Class 2: Correct Prediction
Class 2: Correct Prediction
Class 2: Incorrect Prediction
Class 2: Incorrect Prediction
Class 3: Correct Prediction
Class 3: Correct Prediction
Class 3: Incorrect Prediction
Class 3: Incorrect Prediction
Class 4: Correct Prediction
Class 4: Correct Prediction
Class 4: Incorrect Prediction
Class 4: Incorrect Prediction
Class 5: Correct Prediction
Class 5: Correct Prediction
Class 5: Incorrect Prediction
Class 5: Incorrect Prediction
Experiment 5 : Multi Layer Perceptron Approach as mentioned in the research paper
In [15]:
def create_feature_extractor(base_model):
base_model.trainable = False
x = base_model.output
x = layers.GlobalAveragePooling2D()(x)
model = models.Model(inputs=base_model.input, outputs=x)
return model
inception_feature_extractor = create_feature_extractor(InceptionV3(weights='imagenet', include_top=False, input_shape=(128, 128, 3)))
densenet_feature_extractor = create_feature_extractor(DenseNet169(weights='imagenet', include_top=False, input_shape=(128, 128, 3)))
def extract_combined_features(feature_extractors, X):
features = [extractor.predict(X) for extractor in feature_extractors]
combined_features = np.concatenate(features, axis=1)
return combined_features
X_train_combined = extract_combined_features([inception_feature_extractor, densenet_feature_extractor], X_train)
X_val_combined = extract_combined_features([inception_feature_extractor, densenet_feature_extractor], X_val)
def create_mlp_model(input_shape, num_classes=6):
model = models.Sequential()
model.add(layers.InputLayer(input_shape=input_shape))
model.add(layers.Dense(1024, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(num_classes, activation='softmax'))
return model
mlp_model = create_mlp_model(input_shape=(X_train_combined.shape[1],))
def train_and_evaluate_mlp_model(model, X_train, y_train, X_val, y_val, epochs=50, batch_size=32):
model.compile(optimizer='adam',
loss='categorical_crossentropy', # Use sparse categorical crossentropy for integer labels
metrics=['accuracy'])
history = model.fit(X_train, y_train,
validation_data=(X_val, y_val),
epochs=epochs,
batch_size=batch_size)
val_loss, val_accuracy = model.evaluate(X_val, y_val)
print(f'Validation loss: {val_loss}, Validation accuracy: {val_accuracy}')
return model, val_accuracy
mlp_model, mlp_accuracy = train_and_evaluate_mlp_model(mlp_model, X_train_combined, y_train, X_val_combined, y_val)
def evaluate_mlp_model(model, X_val, y_val):
if len(y_val.shape) > 1 and y_val.shape[1] > 1:
y_val = np.argmax(y_val, axis=-1) # Convert to class indices if one-hot encoded
y_pred = np.argmax(model.predict(X_val), axis=-1) # Convert predictions to class indices
print("Classification Report:\n", classification_report(y_val, y_pred))
cm = confusion_matrix(y_val, y_pred)
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=np.arange(6), yticklabels=np.arange(6))
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.title('Confusion Matrix')
plt.show()
evaluate_mlp_model(mlp_model, X_val_combined, y_val)
310/310 ━━━━━━━━━━━━━━━━━━━━ 52s 159ms/step 310/310 ━━━━━━━━━━━━━━━━━━━━ 110s 347ms/step 78/78 ━━━━━━━━━━━━━━━━━━━━ 16s 200ms/step 78/78 ━━━━━━━━━━━━━━━━━━━━ 36s 462ms/step Epoch 1/50
/opt/anaconda3/lib/python3.12/site-packages/keras/src/layers/core/input_layer.py:26: UserWarning: Argument `input_shape` is deprecated. Use `shape` instead. warnings.warn(
310/310 ━━━━━━━━━━━━━━━━━━━━ 5s 14ms/step - accuracy: 0.5781 - loss: 1.9110 - val_accuracy: 0.8109 - val_loss: 0.5643 Epoch 2/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 14ms/step - accuracy: 0.7580 - loss: 0.6875 - val_accuracy: 0.8242 - val_loss: 0.5192 Epoch 3/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 5s 15ms/step - accuracy: 0.7840 - loss: 0.6153 - val_accuracy: 0.8419 - val_loss: 0.4778 Epoch 4/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 14ms/step - accuracy: 0.8088 - loss: 0.5648 - val_accuracy: 0.8246 - val_loss: 0.4838 Epoch 5/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 0.8111 - loss: 0.5579 - val_accuracy: 0.8480 - val_loss: 0.4381 Epoch 6/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 0.8200 - loss: 0.5056 - val_accuracy: 0.8629 - val_loss: 0.4213 Epoch 7/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 0.8331 - loss: 0.4719 - val_accuracy: 0.8560 - val_loss: 0.4331 Epoch 8/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 0.8445 - loss: 0.4425 - val_accuracy: 0.8560 - val_loss: 0.4308 Epoch 9/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 0.8573 - loss: 0.4115 - val_accuracy: 0.8746 - val_loss: 0.3946 Epoch 10/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 0.8593 - loss: 0.4044 - val_accuracy: 0.8645 - val_loss: 0.4054 Epoch 11/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 0.8625 - loss: 0.4051 - val_accuracy: 0.8665 - val_loss: 0.3882 Epoch 12/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 0.8670 - loss: 0.3870 - val_accuracy: 0.8694 - val_loss: 0.3825 Epoch 13/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 0.8605 - loss: 0.3968 - val_accuracy: 0.8706 - val_loss: 0.3923 Epoch 14/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 0.8679 - loss: 0.3810 - val_accuracy: 0.8685 - val_loss: 0.4064 Epoch 15/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 0.8677 - loss: 0.3701 - val_accuracy: 0.8617 - val_loss: 0.4052 Epoch 16/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 14ms/step - accuracy: 0.8787 - loss: 0.3656 - val_accuracy: 0.8806 - val_loss: 0.3864 Epoch 17/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 0.8782 - loss: 0.3438 - val_accuracy: 0.8585 - val_loss: 0.4206 Epoch 18/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 0.8848 - loss: 0.3330 - val_accuracy: 0.8786 - val_loss: 0.3821 Epoch 19/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 14ms/step - accuracy: 0.8850 - loss: 0.3428 - val_accuracy: 0.8782 - val_loss: 0.3789 Epoch 20/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 14ms/step - accuracy: 0.8924 - loss: 0.3130 - val_accuracy: 0.8504 - val_loss: 0.4298 Epoch 21/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 0.8863 - loss: 0.3348 - val_accuracy: 0.8790 - val_loss: 0.3791 Epoch 22/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 14ms/step - accuracy: 0.8825 - loss: 0.3446 - val_accuracy: 0.8770 - val_loss: 0.3837 Epoch 23/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 0.8922 - loss: 0.3119 - val_accuracy: 0.8714 - val_loss: 0.3866 Epoch 24/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 14ms/step - accuracy: 0.8937 - loss: 0.3203 - val_accuracy: 0.8810 - val_loss: 0.3875 Epoch 25/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 0.8905 - loss: 0.3339 - val_accuracy: 0.8653 - val_loss: 0.3969 Epoch 26/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 0.8940 - loss: 0.3192 - val_accuracy: 0.8786 - val_loss: 0.3833 Epoch 27/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 13ms/step - accuracy: 0.9034 - loss: 0.2935 - val_accuracy: 0.8738 - val_loss: 0.3906 Epoch 28/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 14ms/step - accuracy: 0.8999 - loss: 0.3087 - val_accuracy: 0.8778 - val_loss: 0.3951 Epoch 29/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 14ms/step - accuracy: 0.8998 - loss: 0.3006 - val_accuracy: 0.8827 - val_loss: 0.3918 Epoch 30/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 14ms/step - accuracy: 0.8989 - loss: 0.3025 - val_accuracy: 0.8815 - val_loss: 0.3862 Epoch 31/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 5s 14ms/step - accuracy: 0.9113 - loss: 0.2721 - val_accuracy: 0.8609 - val_loss: 0.4373 Epoch 32/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 14ms/step - accuracy: 0.9082 - loss: 0.2666 - val_accuracy: 0.8802 - val_loss: 0.3951 Epoch 33/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 5s 15ms/step - accuracy: 0.9110 - loss: 0.2776 - val_accuracy: 0.8766 - val_loss: 0.4125 Epoch 34/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 5s 15ms/step - accuracy: 0.9047 - loss: 0.2911 - val_accuracy: 0.8681 - val_loss: 0.4078 Epoch 35/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 5s 18ms/step - accuracy: 0.9056 - loss: 0.2762 - val_accuracy: 0.8738 - val_loss: 0.4214 Epoch 36/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 5s 15ms/step - accuracy: 0.9166 - loss: 0.2571 - val_accuracy: 0.8690 - val_loss: 0.4516 Epoch 37/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 5s 15ms/step - accuracy: 0.9187 - loss: 0.2469 - val_accuracy: 0.8661 - val_loss: 0.4266 Epoch 38/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 5s 15ms/step - accuracy: 0.9041 - loss: 0.2752 - val_accuracy: 0.8774 - val_loss: 0.4269 Epoch 39/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 5s 15ms/step - accuracy: 0.9081 - loss: 0.2774 - val_accuracy: 0.8730 - val_loss: 0.4135 Epoch 40/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 14ms/step - accuracy: 0.9042 - loss: 0.2850 - val_accuracy: 0.8645 - val_loss: 0.4419 Epoch 41/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 14ms/step - accuracy: 0.9187 - loss: 0.2495 - val_accuracy: 0.8798 - val_loss: 0.4082 Epoch 42/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 5s 15ms/step - accuracy: 0.9155 - loss: 0.2548 - val_accuracy: 0.8685 - val_loss: 0.4349 Epoch 43/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 14ms/step - accuracy: 0.9114 - loss: 0.2537 - val_accuracy: 0.8722 - val_loss: 0.3958 Epoch 44/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 14ms/step - accuracy: 0.9011 - loss: 0.2771 - val_accuracy: 0.8746 - val_loss: 0.4294 Epoch 45/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 5s 15ms/step - accuracy: 0.9125 - loss: 0.2544 - val_accuracy: 0.8758 - val_loss: 0.4221 Epoch 46/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 4s 14ms/step - accuracy: 0.9098 - loss: 0.2688 - val_accuracy: 0.8714 - val_loss: 0.4677 Epoch 47/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 5s 15ms/step - accuracy: 0.9178 - loss: 0.2403 - val_accuracy: 0.8786 - val_loss: 0.4337 Epoch 48/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 5s 15ms/step - accuracy: 0.9169 - loss: 0.2415 - val_accuracy: 0.8685 - val_loss: 0.4651 Epoch 49/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 5s 15ms/step - accuracy: 0.9097 - loss: 0.2701 - val_accuracy: 0.8726 - val_loss: 0.4429 Epoch 50/50 310/310 ━━━━━━━━━━━━━━━━━━━━ 5s 16ms/step - accuracy: 0.9130 - loss: 0.2605 - val_accuracy: 0.8774 - val_loss: 0.4378 78/78 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.8842 - loss: 0.4077 Validation loss: 0.43775251507759094, Validation accuracy: 0.8774193525314331 78/78 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step Classification Report: precision recall f1-score support 0 0.85 0.87 0.86 417 1 0.99 0.97 0.98 629 2 0.86 0.78 0.82 201 3 0.88 0.79 0.83 319 4 0.95 0.86 0.90 309 5 0.77 0.88 0.82 605 accuracy 0.88 2480 macro avg 0.88 0.86 0.87 2480 weighted avg 0.88 0.88 0.88 2480